added Feb 2001 SDK
[windows-sources.git] / shared source / vb / language / shared / pageheap.cpp
blob9e502b8e059e3bfdd221066ef84ef279d992a969
1 //-------------------------------------------------------------------------------------------------
2 //
3 // Copyright (c) Microsoft Corporation. All rights reserved.
4 //
5 // Page Heap management. Ruthlessly stolen from the C# team. Please notify [....] and [....]
6 // about any changes to this file. It is likely the change will need to be mirrored in the C#
7 // implementation
8 //
9 //-------------------------------------------------------------------------------------------------
11 #include "StdAfx.h"
13 /* The biggest trickiness with the page heap is that it is inefficient
14 * to allocate single pages from the operating system - NT will allocate
15 * only on 64K boundaries, so allocating a 4k page is needlessly inefficient.
16 * We use the ability to reserve then commit pages to reserve moderately
17 * large chunks of memory (a PageArena), then commit pages in the arena.
18 * This also allows us to track pages allocated and detect leaks.
22 * static data members
24 size_t PageHeap::pageSize; // The system page size.
25 int PageHeap::pageShift; // log2 of the page size
26 bool PageHeap::reliableCommit; // MEM_COMMIT reliable?
28 size_t GetSystemPageSize()
30 static size_t g_pageSize;
32 if (!g_pageSize)
34 // Get the system page size.
35 SYSTEM_INFO sysinfo;
36 GetSystemInfo(&sysinfo);
37 g_pageSize = sysinfo.dwPageSize;
40 return g_pageSize;
43 void PageHeap::StaticInit()
45 // First time through -- get system page size.
46 if (!PageHeap::pageSize)
48 // Get the system page size.
49 PageHeap::pageSize = GetSystemPageSize();
51 // Determine the page shift.
52 int shift = 0;
53 size_t size = PageHeap::pageSize;
54 while (size != 1)
56 shift += 1;
57 size >>= 1;
59 PageHeap::pageShift = shift;
61 VSASSERT((size_t)(1 << PageHeap::pageShift) == PageHeap::pageSize, "Invalid");
63 OSVERSIONINFO osvi;
64 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
65 BOOL ok = GetVersionEx(&osvi);
66 VSASSERT(ok, "Invalid");
67 reliableCommit = ok && osvi.dwMajorVersion >= 5;
71 void PageHeap::PageArena::FreeAddressSpace()
73 VirtualFree(pages, 0, MEM_RELEASE);
74 pages = NULL;
75 size = 0;
78 bool PageHeap::PageArena::HasUsedPages() const
80 for (unsigned int i = 0; i < PAGES_PER_ARENA / BITS_DWORD; i++)
82 if (used[i])
83 return true;
86 return false;
89 PageHeap::PageHeap() :
90 m_pageCurUse(0),
91 m_pageMaxUse(0),
92 m_pageCurReserve(0),
93 m_pageMaxReserve(0),
94 arenaList(NULL),
95 arenaLast(NULL),
96 singlePageArenaList(NULL),
97 singlePageArenaLast(NULL),
98 whatIsProtected(ProtectedEntityFlags::Nothing)
100 CTinyGate gate (&lock ); // Acquire the lock
101 StaticInit();
105 * Destructor. Free everything.
107 PageHeap::~PageHeap()
109 CTinyGate gate (&lock ); // Acquire the lock
110 FreeAllPages();
113 void PageHeap::ShrinkUnusedResources()
115 //[....] the ordering of these is important. First free all possible
116 //pages. That may result in an unused arena which are harvested by the second call.
117 DecommitUnusedPages();
118 FreeUnusedArenas();
120 #if NRLSTRACK
121 // see where our remaining pages were allocated from
122 CComBSTR str;
123 long count = 0;
124 long totmem = 1; // for thousands of allocators, this is very expensive on idle thread. Thus just get totals.
125 str = g_NrlsAllocTracker->GetAllocatorStatusReport(&count, &totmem);
126 #endif
129 // [....] Search a segment of pages in an arena for cPages of contiguous free pages.
130 int PageHeap::PageArena::LookForPages(unsigned int cPages, int indexPageBegin, int indexLastValidPage)
132 unsigned int cPagesRemaining = cPages;
133 while (true)
135 // Scan to see if cPages pages starting at indexPageBegin are not in use.
136 if (!(indexPageBegin & DWORD_BIT_MASK))
138 // The current page is on a DWORD boundary, use an optimized check.
139 // Search this area for free pages. First, find a dword that isn't all used.
140 // This loop quickly skips (32 at a time) the used pages.
141 int dwIndexByDword;
142 for (dwIndexByDword = indexPageBegin / BITS_DWORD;
143 dwIndexByDword <= indexLastValidPage / BITS_DWORD;
144 ++dwIndexByDword)
146 if (used[dwIndexByDword] != 0xFFFFFFFF)
148 break; // not all used.
150 else
152 //all of these pages are used, reset the counter.
153 cPagesRemaining = cPages;
156 indexPageBegin = dwIndexByDword * BITS_DWORD;
159 //Did the loop take us beyond the last valid page, if so
160 //this allocation request can't be fulfilled from this range.
161 if (indexPageBegin > indexLastValidPage)
163 return -1;
166 if (IsPageUsed(indexPageBegin))
168 cPagesRemaining = cPages;
169 indexPageBegin++;
171 else
173 indexPageBegin++;
174 cPagesRemaining--;
177 if (!cPagesRemaining)
179 return indexPageBegin - cPages;
184 return -1;
187 void * PageHeap::PageArena::AllocPagesHelper(int iPage, unsigned int cPages, PageHeap& parent)
189 size_t cBytes = cPages << pageShift;
190 void* p = (BYTE *)pages + (iPage << pageShift); // Calculate address of allocation.
191 bool allCommitted = true;
193 for (unsigned i = 0; i < cPages && allCommitted; i++)
195 if (! (IsPageCommitted(iPage + i)))
196 allCommitted = false;
199 // Commit the pages from the OS if needed.
200 if (!allCommitted)
202 if (VirtualAlloc(p, cBytes, MEM_COMMIT, PAGE_READWRITE) != p)
204 // If the system couldn't commit the page then we're in
205 // trouble. Presumably it is out of physical memory. Here we
206 // make a last-ditch effort to continue by freeing up unused
207 // pages in hope that we'll then be able to commit.
208 if (!parent.DecommitUnusedPages() ||
209 VirtualAlloc(p, cBytes, MEM_COMMIT, PAGE_READWRITE) != p)
211 VbThrow(GetLastHResultError());
215 if (!reliableCommit || allCommitted)
217 // On Win9X the above call to VirtualAlloc does not leave the memory writeable
218 PageProtect::AllowWrite(ProtectedEntityFlags::UnusedMemory, p, cBytes);
221 // Mark them as in use and committed.
222 unsigned int c = cPages;
223 while (c--)
225 VSASSERT(! (IsPageUsed(iPage)), "Invalid");
227 MarkPageUsed(iPage);
228 MarkPageCommitted(iPage);
230 ++iPage;
233 #ifdef DEBUG
234 // Make sure they aren't zero filled.
235 memset(p, 0xCC, cBytes);
236 #endif //DEBUG
238 return p;
241 void* PageHeap::PageArena::AllocPages(unsigned int cPages, PageHeap& parent)
243 //[....] If SPREAD_ALLOCATIONS defined, minimize the reuse of address space.
244 //Do this so that pointers to address space that has been allocated
245 //and then freed more likely point to either decommitted or protected pages.
246 //Otherwise, if we continue to reuse the same address space, dangling ptrs
247 //will very possibly be pointing to accessible memory allowing read and
248 //write through them to bogus data.
251 unsigned int iWhereToBeginPageSearch =
252 #ifdef SPREAD_ALLOCATIONS
253 m_iStartNextAlloc; //begin search at page after last allocation
254 #else
256 #endif
258 int iPage = LookForPages((unsigned int)cPages, iWhereToBeginPageSearch, PAGES_PER_ARENA - 1);
260 #ifdef SPREAD_ALLOCATIONS
261 if (-1 == iPage)
263 //previous search began at position N at which point cPages - 1 may have
264 //been free, but the next page was used and therefore the search didn't
265 //succeed. Therefore look from beginning to N + cPages - 1 so as not to
266 //leave a potential hole of cPages - 1.
267 iPage = LookForPages((unsigned int)cPages, 0,
268 min(m_iStartNextAlloc + (unsigned int)cPages - 1, PAGES_PER_ARENA - 1));
270 #endif
272 if (-1 != iPage)
274 #ifdef SPREAD_ALLOCATIONS
275 // Success, we found contiguous free pages.
276 m_iStartNextAlloc = iPage + cPages;
277 #endif
279 void* p = AllocPagesHelper(iPage, cPages, parent);
281 // Return the address of the allocated pages.
282 return p;
284 else
286 return NULL;
292 * Allocate a set of pages from the page heap. Memory is not
293 * zero-filled.
296 void * PageHeap::AllocPages(size_t sz)
298 CTinyGate gate (&lock ); // Acquire the lock
300 VSASSERT(sz % pageSize == 0 && sz != 0, "Invalid"); // must be page size multiple.
302 m_pageCurUse += sz / pageSize;
303 if (m_pageCurUse > m_pageMaxUse)
304 m_pageMaxUse = m_pageCurUse;
306 // How many pages are being allocated?
307 size_t cPages = (sz >> pageShift);
309 // Handle Single Page alloc requests in an optimized fashion
310 // This case is taken >99% of the time
311 if (cPages == 1)
313 return SinglePageAlloc();
316 // Handle very large allocations differently.
317 if (sz > BIGALLOC_SIZE)
319 return LargeAlloc(sz);
322 void * p;
323 PageArena * arena;
325 // Check each arena in turn for enough contiguous pages.
326 for (arena = arenaList; arena != NULL; arena = arena->nextArena)
328 if (arena->type == LargeAllocation)
329 continue; // Large allocation arenas are not interesting.
331 if (p = arena->AllocPages((unsigned int)cPages, *this))
333 return p;
337 // No arenas have enough free space. Create a new arena and allocate
338 // at the beginning of that arena.
339 arena = CreateArena(Normal, PAGES_PER_ARENA * pageSize);
342 p = arena->AllocPages((unsigned int)cPages, *this);
344 return p;
347 template <typename T>
348 void PageHeap::RemoveArena (const T* goingAway, T*& containingArenaList, T*& containingArenaListLast)
350 TemplateUtil::CompileAssertIsChildOf<PageArena,T>();
351 // Remove the arena from the arena list.
352 if (containingArenaList == goingAway)
354 containingArenaList = (T*) goingAway->nextArena;
355 if (containingArenaListLast == goingAway)
356 containingArenaListLast = NULL;
358 else
360 T* arenaPrev;
362 // Find arena just before the one we want to remove
363 for (arenaPrev = containingArenaList; arenaPrev->nextArena != goingAway; arenaPrev = (T*) arenaPrev->nextArena)
366 VSASSERT(arenaPrev->nextArena == goingAway, "Invalid");
367 arenaPrev->nextArena = (T*) goingAway->nextArena;
368 if (containingArenaListLast == goingAway)
369 containingArenaListLast = arenaPrev;
373 void PageHeap::FreeUnusedArenas()
375 CTinyGate gate (&lock);
377 PageArena* nextArena = NULL;
379 for (PageArena* arena = arenaList; arena != NULL; arena = nextArena)
381 nextArena = arena->nextArena;
383 if (arena->type == LargeAllocation)
384 continue;
386 if (!arena->HasUsedPages())
388 //unlink from list.
389 RemoveArena(arena, arenaList, arenaLast);
390 size_t addressSpace = arena->GetAddressSpaceSize();
391 arena->FreeAddressSpace();
392 m_pageCurReserve -= addressSpace / pageSize;
394 delete arena;
398 // The only arenas that may potentially be freed are in the queue of arenas with free pages.
399 int size = static_cast<int>(singlePageArenasWithFreePages.size());
400 for(int i = 0; i < size; i++)
402 SinglePageArena* arena = singlePageArenasWithFreePages.front();
403 singlePageArenasWithFreePages.pop();
405 if (arena->NumberOfFreePagesAvailable() == PAGES_PER_ARENA) // all the pages are free
407 // Unlink from list and delete the arena
408 addressToSinglePageArenaMap.erase(arena->pages);
409 RemoveArena(arena, singlePageArenaList, singlePageArenaLast);
410 size_t addressSpace = arena->GetAddressSpaceSize();
411 arena->FreeAddressSpace(); // sets arena->pages = NULL
412 m_pageCurReserve -= addressSpace / pageSize;
414 delete arena;
416 else
418 singlePageArenasWithFreePages.push(arena); // add the arena back to the queue if we didn't delete it
424 * Free pages page to the page heap. The size must be the
425 * same as when allocated.
427 void PageHeap::FreePages(ProtectedEntityFlagsEnum entity, _Post_invalid_ void * p, size_t sz)
429 CTinyGate gate (&lock ); // Acquire the lock
431 VSASSERT(sz % pageSize == 0 && sz != 0, "Invalid"); // must be page size multiple.
433 m_pageCurUse -= sz / pageSize;
435 size_t cPages = (sz >> pageShift);
436 if (cPages == 1)
438 SinglePageFree(entity, p);
439 return;
442 // Handle very large allocations differently.
443 if (sz > BIGALLOC_SIZE)
445 LargeFree(p, sz);
446 return ;
449 // Find the arena this page is in.
450 PageArena * arena = FindArena(p);
451 VSASSERT(arena, "Invalid");
453 FreePagesHelper(entity, arena, p, sz);
456 void PageHeap::FreePagesHelper(ProtectedEntityFlagsEnum entity, PageArena * arena, void * p, size_t sz)
458 size_t cPages = (sz >> pageShift);
460 // Get page index within this arena, and page count.
461 size_t initialPageIndex = ((BYTE *)p - (BYTE *)arena->pages) >> pageShift;
463 // Pages must be in-use and committed. Set the pages to not-in-use. We could
464 // decommit the pages here, but it is more efficient to keep them around
465 // committed because we'll probably want them again. To actually decommit
466 // them, call PageHeap::DecommitUnusedPages().
467 size_t iPage = initialPageIndex;
468 while (cPages--)
470 VSASSERT(arena->used[iPage >> DWORD_BIT_SHIFT] & (1 << (iPage & DWORD_BIT_MASK)), "Invalid");
471 VSASSERT(arena->committed[iPage >> DWORD_BIT_SHIFT] & (1 << (iPage & DWORD_BIT_MASK)), "Invalid");
473 arena->used[iPage >> DWORD_BIT_SHIFT] &= ~(1 << (iPage & DWORD_BIT_MASK));
475 ++iPage;
478 #ifdef DECOMMIT_ON_FREE
480 iPage = initialPageIndex;
481 BOOL b = VirtualFree((BYTE *)arena->pages + (iPage << pageShift), sz, MEM_DECOMMIT);
482 ASSERT(b); //[....] throw VcsException?
483 size_t cPgs = sz >> pageShift;
484 if (b)
486 while (cPgs--)
488 arena->ClearPageCommitted((unsigned int)iPage);
489 iPage++;
493 #else
495 #ifdef DEBUG
496 PageProtect::AllowWrite(entity, p, sz);
498 // Fill pages with junk to indicated unused.
499 memset(p, 0xAE, sz);
500 #endif //DEBUG
502 if (PageProtect::IsEntityProtected(ProtectedEntityFlags::UnusedMemory))
504 PageProtect::ForbidAccess(ProtectedEntityFlags::UnusedMemory, p, sz);
506 else
508 PageProtect::AllowWrite(entity, p, sz);
510 #endif
513 /////////////////////////////////////////////////////////////////////////////////
514 // Allocate a very large allocation. An entire arena is allocated for the allocation.
516 void* PageHeap::LargeAlloc(size_t sz)
518 CTinyGate gate (&lock); // Acquire the lock
519 // Create an arena for this large allocation.
520 PageArena* newArena = CreateArena(LargeAllocation, sz);
522 #ifdef DEBUG
523 // Make sure they aren't zero filled.
524 memset(newArena->pages, 0xCC, sz);
525 #endif //DEBUG
527 return newArena->pages;
531 * Free a large allocation made via LargeAlloc.
533 void PageHeap::LargeFree(void * p, size_t sz)
535 // Find the arena corresponding to this large allocation.
536 CTinyGate gate (&lock); // Acquire the lock
537 PageArena * arena = FindArena(p);
538 VSASSERT(arena && arena->type == LargeAllocation && arena->pages == p && arena->size == sz, "Invalid");
540 m_pageCurReserve -= sz / pageSize;
542 // Free the pages.
543 BOOL b;
544 b = VirtualFree(p, 0, MEM_RELEASE);
545 VSASSERT(b, "Invalid");
547 // Remove the arena from the arena list.
548 if (arenaList == arena)
550 arenaList = arena->nextArena;
551 if (arenaLast == arena)
552 arenaLast = NULL;
554 else
556 PageArena * arenaPrev;
558 // Find arena just before the one we want to remove
559 for (arenaPrev = arenaList; arenaPrev->nextArena != arena; arenaPrev = arenaPrev->nextArena)
562 VSASSERT(arenaPrev->nextArena == arena, "Invalid");
563 arenaPrev->nextArena = arena->nextArena;
564 if (arenaLast == arena)
565 arenaLast = arenaPrev;
568 // Free the arena structure.
569 delete arena;
572 /////////////////////////////////////////////////////////////////////////////////
573 // Allocate a single page allocation.
575 void* PageHeap::SinglePageAlloc()
577 void * p;
578 SinglePageArena * arena;
580 if (!singlePageArenasWithFreePages.empty())
582 // any arena will work here, taking the first one each time is probably best for locality
583 arena = singlePageArenasWithFreePages.front();
585 // pop the top free page from our stack
586 int iPage = arena->freePageStack[arena->topOfFreePageStack];
587 arena->topOfFreePageStack--;
589 // remove the arena from the singlePageArenasWithFreePages set if we just used the last page
590 if (arena->NumberOfFreePagesAvailable() == 0)
592 singlePageArenasWithFreePages.pop();
595 p = arena->AllocPagesHelper(iPage, 1, *this);
597 // Return the address of the allocated pages.
598 return p;
601 // No arenas have enough free space. Create a new arena and allocate
602 // at the beginning of that arena.
603 arena = (SinglePageArena*) CreateArena(SinglePageAllocation, PAGES_PER_ARENA * pageSize);
605 int iPage = arena->freePageStack[arena->topOfFreePageStack];
606 arena->topOfFreePageStack--;
608 p = arena->AllocPagesHelper(iPage, 1, *this);
610 return p;
614 * Free a single page allocation made via SinglePageAlloc.
616 void PageHeap::SinglePageFree(ProtectedEntityFlagsEnum entity, _Post_invalid_ void * p)
618 CTinyGate gate (&lock); // Acquire the lock
619 // Find the arena corresponding to free
620 // Get the first arena whose starting memory address is greater than p, then go back one arena because
621 // p belongs to the closest arena whose first page is <= p, and upper_bound returns the first arena whose
622 // page is strictly greater than p.
623 SinglePageArena* arena = (--addressToSinglePageArenaMap.upper_bound(p))->second;
624 VSASSERT(arena && arena->size == PAGES_PER_ARENA * pageSize && arena->OwnsPage(p), "Invalid");
626 // Mark the page as freed
627 FreePagesHelper(entity, arena, p, pageSize);
629 // push page back on to our free page stack
630 int iPage = (int) ((BYTE *)p - (BYTE *)arena->pages) >> pageShift;
631 ++arena->topOfFreePageStack;
632 VSASSERT(arena->topOfFreePageStack < PAGES_PER_ARENA, "too many pages available");
633 arena->freePageStack[arena->topOfFreePageStack] = iPage;
635 // add this arena back to our free list if we were full, but now have a single free page
636 if (arena->NumberOfFreePagesAvailable() == 1)
638 singlePageArenasWithFreePages.push(arena);
642 void FreeArenaList(PageHeap::PageArena* list, bool checkLeaks)
644 PageHeap::PageArena * arena, *nextArena;
646 for (arena = list; arena != NULL; arena = nextArena)
648 nextArena = arena->nextArena;
650 // Check arena for leaks, if desired.
651 if (checkLeaks)
653 VSASSERT(arena->type != PageHeap::LargeAllocation, "Invalid"); // Large allocation should have been freed by now.
655 for (int dwIndex = 0; dwIndex < PAGES_PER_ARENA / BITS_DWORD; ++dwIndex)
657 VSASSERT(arena->used[dwIndex] == 0, "Invalid"); // All pages in this arena should be free.
661 // Free the pages in the arena.
662 BOOL b;
663 b = VirtualFree(arena->pages, 0, MEM_RELEASE);
664 VSASSERT(b, "Invalid");
666 // Free the arena structure.
667 delete arena;
672 * Free everything allocated by the page heap; optionally checking for
673 * leak (memory that hasn't been freed via FreePages).
675 void PageHeap::FreeAllPages(bool checkLeaks)
677 CTinyGate gate (&lock ); // Acquire the lock
679 FreeArenaList(arenaList, checkLeaks);
680 FreeArenaList(singlePageArenaList, checkLeaks);
682 m_pageCurUse = m_pageCurReserve = 0;
683 arenaList = arenaLast = NULL;
684 singlePageArenaList = singlePageArenaLast = NULL;
686 addressToSinglePageArenaMap.clear();
687 singlePageArenasWithFreePages = std::queue<SinglePageArena*>();
690 bool PageHeap::DecommitUnusedPagesFromArenaList(PageArena* list)
692 bool anyDecommitted = false;
694 PageArena * arena;
695 BOOL b;
697 for (arena = list; arena != NULL; arena = arena->nextArena)
699 if (arena->type == LargeAllocation)
700 continue;
702 for (int dwIndex = 0; dwIndex < PAGES_PER_ARENA / BITS_DWORD; ++dwIndex)
704 // Can we decommit 32 pages at once with one OS call?
705 if (arena->used[dwIndex] == 0 && arena->committed[dwIndex] != 0)
707 #pragma warning (push)
708 #pragma warning (disable: 6250)
709 b = VirtualFree((BYTE *)arena->pages + ((dwIndex * BITS_DWORD) << pageShift),
710 BITS_DWORD << pageShift,
711 MEM_DECOMMIT);
712 #pragma warning (pop)
713 VSASSERT(b, "Invalid");
714 if (b)
716 anyDecommitted = true;
717 arena->committed[dwIndex] = 0;
720 else if (arena->used[dwIndex] != arena->committed[dwIndex])
722 // Some pages in this group should be decommitted. Check each one individually.
723 for (int iPage = dwIndex * BITS_DWORD; iPage < (dwIndex + 1) * BITS_DWORD; ++iPage)
725 if ( ! (arena->used[iPage >> DWORD_BIT_SHIFT] & (1 << (iPage & DWORD_BIT_MASK))) &&
726 (arena->committed[iPage >> DWORD_BIT_SHIFT] & (1 << (iPage & DWORD_BIT_MASK))))
728 #pragma warning (push)
729 #pragma warning (disable: 6250)
730 b = VirtualFree((BYTE *)arena->pages + (iPage << pageShift), pageSize, MEM_DECOMMIT);
731 #pragma warning (pop)
732 VSASSERT(b, "Invalid");
733 if (b)
735 anyDecommitted = true;
736 arena->committed[iPage >> DWORD_BIT_SHIFT] &= ~(1 << (iPage & DWORD_BIT_MASK));
743 #ifdef DEBUG
744 // At this point, the only committed pages in this arena should be in use.
745 for (int dwIndex = 0; dwIndex < PAGES_PER_ARENA / BITS_DWORD; ++dwIndex)
747 VSASSERT(arena->used[dwIndex] == arena->committed[dwIndex], "Invalid");
749 #endif //DEBUG
751 return anyDecommitted;
755 * Decommit any pages that aren't in use. Decommits memory that
756 * can be profitably used by other parts of the system.
758 bool PageHeap::DecommitUnusedPages()
760 CTinyGate gate (&lock ); // Acquire the lock
762 bool anyDecommitted = DecommitUnusedPagesFromArenaList(arenaList);
763 anyDecommitted |= DecommitUnusedPagesFromArenaList(singlePageArenaList);
765 return anyDecommitted;
768 PageHeap::SinglePageArena::SinglePageArena()
770 // push the arena's list of pages onto the free page stack
771 topOfFreePageStack = -1;
772 for(int i = 0; i < PAGES_PER_ARENA; i++)
774 ++topOfFreePageStack;
775 freePageStack[topOfFreePageStack] = i;
780 * Create a new memory arena of a size and reserve or commit pages for it.
781 * If type == LargeAllocation, set this as a "large allocation" arena and commit
782 * the memory. If not just reserve the memory.
784 PageHeap::PageArena * PageHeap::CreateArena(PageArenaType type, size_t sz)
786 // Allocate an arena and reserve pages for it.
787 PageArena* newArena;
788 SinglePageArena* newSinglePageArena = NULL;
790 if (type == SinglePageAllocation)
792 newSinglePageArena = new (zeromemory) SinglePageArena;
794 if (!singlePageArenaList)
796 singlePageArenaList = singlePageArenaLast = newSinglePageArena;
798 else
800 // Add to front of arena list
801 newSinglePageArena->nextArena = singlePageArenaList;
802 singlePageArenaList = newSinglePageArena;
805 newArena = newSinglePageArena;
807 else
809 newArena = new (zeromemory) PageArena;
811 // Add to arena list. For efficiency, large allocation arenas are placed
812 // at the end, but regular arenas at the beginning. This ensures that
813 // regular allocation are almost always satisfied by the first arena in the list.
814 if (!arenaList)
816 arenaList = arenaLast = newArena;
818 else if (type == LargeAllocation)
820 // Add to end of arena list.
821 arenaLast->nextArena = newArena;
822 arenaLast = newArena;
824 else
826 // Add to front of arena list
827 newArena->nextArena = arenaList;
828 arenaList = newArena;
832 newArena->pages = VirtualAlloc(0, sz, type == LargeAllocation ? MEM_COMMIT : MEM_RESERVE, PAGE_READWRITE);
833 if (!newArena->pages)
835 VbThrow(GetLastHResultError());
838 if (newSinglePageArena)
840 // also add the new SinglePageArena to our indexing data structures
841 VSASSERT(addressToSinglePageArenaMap.find(newSinglePageArena->pages) == addressToSinglePageArenaMap.end(),
842 "We shouldn't already have an arena for this address");
843 addressToSinglePageArenaMap[newSinglePageArena->pages] = newSinglePageArena;
844 singlePageArenasWithFreePages.push(newSinglePageArena);
847 m_pageCurReserve += sz / pageSize;
848 if (m_pageCurReserve > m_pageMaxReserve)
850 m_pageMaxReserve = m_pageCurReserve;
853 newArena->size = sz;
854 newArena->type = type;
856 return newArena;
860 * Find an arena that contains a particular pointer.
862 PageHeap::PageArena * PageHeap::FindArena(const void * p)
864 PageArena * arena;
866 for (arena = arenaList; arena != NULL; arena = arena->nextArena)
868 if (arena->OwnsPage(p))
869 return arena;
872 VSASSERT(0, "Invalid"); // Should find the arena.
873 return NULL;